I am hoping to automate creating animations based on an object and a number of camera positions. I’m stuck from the start. Could someone show me an example of how to change the camera position while looking at a target. For example looking at a cube and rotating a camera around the cube from a script? I have been playing with the get and set camera functions but I cannot get even a simple script to work that changes a couple of camera positions while keeping the camera looking at the target.
Or is this something that is outside of the scope of the scripting features?
@anton.bakker short answer is no, you cannot script animations in KeyShot, All of the Keyshot python commands related to animations is specifically for controlling the animation timeline.
That said, here are 3 camera scripts.
Essential - Get Camera Information
# AUTHOR LUX DT ChatGPT
# VERSION 1.2.4
# Get Camera settings and display the look-at point.
# Script to view camera details in a window in KeyShot.
import lux # Import the KeyShot built-in module 'lux'.
def get_camera_info(camera_name):
"""
Retrieve and format information for a specified camera, including look-at point.
:param camera_name: Name of the camera to get information about.
:return: Formatted string containing camera details.
"""
# Set the selected camera as active.
lux.setCamera(camera_name)
# Retrieve various camera details.
lens_info = lux.getCameraLens() # Retrieves camera lens information
fov = round(lux.getCameraFieldOfView(), 2) # Field of view.
focal_length = round(lux.getCameraFocalLength(), 2) # Focal length.
# Determine the lens type based on camera mode
lens_type = lens_info.get('lens')
if lens_type == lux.CAMERA_TYPE_PERSPECTIVE:
lens_type_str = 'Perspective'
elif lens_type == lux.CAMERA_TYPE_ORTHOGRAPHIC:
lens_type_str = 'Orthographic'
elif lens_type == lux.CAMERA_TYPE_PANORAMA:
lens_type_str = 'Panoramic'
else:
lens_type_str = 'Unknown'
direction = lux.getCameraDirection() # Camera direction.
distance = round(lux.getCameraDistance(), 2) # Distance from pivot.
position = lux.getCameraPosition() # Camera position.
up_vector = lux.getCameraUp() # Up vector.
look_at = lux.getCameraLookAt() # Get the look-at point.
# Format the camera information for display.
camera_info = (
"Lens Info:\n"
f"FOV: {fov} degrees\n"
f"Focal Length: {focal_length} mm\n"
f"Lens Type: {lens_type_str}\n\n"
f"Direction: {direction}\n"
f"Distance: {distance}\n"
f"Position: {position}\n"
f"Up Vector: {up_vector}\n"
f"Look-At Point: {look_at}"
)
return camera_info
def display_camera_info(cam):
"""
Display information of a selected camera.
:param cam: Name of the camera to display information for.
:return: Boolean indicating if the user wants to go back to the camera list.
"""
camera_info = get_camera_info(cam) # Get camera info.
info_label = (lux.DIALOG_LABEL, camera_info) # Format for displaying.
back_checkbox = ("backToList", lux.DIALOG_CHECK, "Back to Camera List", False)
# Display the camera info in a dialog.
user_response = lux.getInputDialog(title=f"Camera Info: {cam}", values=[info_label, back_checkbox])
return user_response and user_response.get("backToList", False)
def display_camera_selection():
"""
Display a selection dialog for all cameras and show their information.
"""
cameras = lux.getCameras() # Get a list of all cameras.
while True:
# Create dialog elements for camera selection.
camera_selection_elements = [(cam, lux.DIALOG_CHECK, cam, False) for cam in cameras]
user_selection = lux.getInputDialog(title="Select Camera", values=camera_selection_elements)
if not user_selection:
break # Exit if no selection is made.
# Loop through the selected cameras and display their information.
for cam, selected in user_selection.items():
if selected:
back_to_list = display_camera_info(cam)
if back_to_list:
break # Return to the camera list if the user chooses.
# Execute the function to start the camera selection process.
display_camera_selection()
Create Cameras with Additional Functions
# AUTHOR LUX DT ChatGPT
# VERSION 1.1.1
# Template creates cameras with specific parameters.
import lux
def create_and_setup_camera(name, azimuth, distance, inclination):
"""
Creates a new camera in KeyShot or updates an existing one, and sets it up with the given parameters.
Assumes the distance is already in centimeters.
"""
# Get the list of existing camera names
existing_cameras = [cam for cam in lux.getCameras()]
# Check if camera already exists
if name in existing_cameras:
# Set the existing camera as active
lux.setCamera(name)
else:
# Create a new camera
lux.newCamera(name)
# Set spherical camera settings (azimuth, inclination, twist)
lux.setSphericalCamera(azimuth=azimuth, incl=inclination, twist=0)
# Convert distance to float and set distance from camera to pivot/look-at point
lux.setCameraDistance(dist=float(distance))
# Save the camera settings
lux.saveCamera()
# Additional camera settings can be set here using other lux functions
# ...
def set_camera_field_of_view(fov):
"""
Sets the field of view for the active camera.
:param fov: Field of view in degrees.
"""
lux.setCameraFieldOfView(fov)
def set_camera_focal_length(focal_length):
"""
Sets the focal length for the active camera.
:param focal_length: Focal length in millimeters.
"""
lux.setCameraFocalLength(focal_length)
# Camera data with specified field of view and focal lengths
camera_settings = {
"RearFacing": {"fov": 10.286, "focal_length": 200},
"FrontFacing": {"fov": 10.286, "focal_length": 200},
"FrontRightFacing": {"fov": 24, "focal_length": 84.683},
"RearRightFacing": {"fov": 24, "focal_length": 84.683}
}
# Camera data with azimuth and distance values
camera_data = [
{"name": "FrontFacing", "azimuth": -90.000, "distance": 885.390, "inclination": -0.000},
{"name": "FrontRightFacing", "azimuth": -45.000, "distance": 375.466, "inclination": -10.000},
{"name": "RearFacing", "azimuth": 90.000, "distance": 909.570, "inclination": -0.000},
{"name": "RearRightFacing", "azimuth": 135.000, "distance": 375.467, "inclination": -10.000}
]
# Creating and setting up cameras in KeyShot
for cam in camera_data:
create_and_setup_camera(cam["name"], cam["azimuth"], cam["distance"], cam["inclination"])
# Set the field of view and focal length for each camera
settings = camera_settings.get(cam["name"], {})
if settings:
set_camera_field_of_view(settings["fov"])
set_camera_focal_length(settings["focal_length"])
lux.saveCamera()
# If KeyShot requires manual saving of the scene, add the save function here
# lux.saveScene() # Example, replace with actual function if required
Change cameras (from previous script) targets.
# AUTHOR LUX DT ChatGPT
# VERSION 0.1.8
# Script updates the LookAt points of the specified cameras in KeyShot.
import lux
import luxmath
def change_camera_look_at(camera_name, look_at_point):
# Get the list of cameras
cameras = lux.getCameras()
# Check if the specified camera exists
if camera_name not in cameras:
print(f"Camera '{camera_name}' not found.")
return
# Set the active camera to the specified camera
lux.setCamera(camera_name)
# Ensure the look_at_point is a tuple
if not isinstance(look_at_point, (tuple, list)):
print("Look-at point must be a tuple or list.")
return
# Change the look-at point of the active camera
lux.setCameraLookAt(pt=tuple(look_at_point))
print(f"Changed the look-at point of camera '{camera_name}' to {look_at_point}.")
# Camera LookAt settings
camera_lookat_settings = {
"FrontFacing": (0.0, 0.5, 0.0),
"FrontRightFacing": (0.0, 0.25, 0.0),
"RearRightFacing": (0.0, 0.25, 0.0),
"RearFacing": (0.0, 0.5, 0.0)
}
# Update the LookAt points for all specified cameras
for camera_name, look_at_point in camera_lookat_settings.items():
change_camera_look_at(camera_name, look_at_point)
lux.saveCamera()
# If KeyShot requires manual saving of the scene, add the save function here
# lux.saveScene() # Example, replace with actual function if required
@don.tuttle thank you for these examples. I am using ChatGPT for my attempts as well.
Let my clarify my plans. I like to automate the creation of sculpture animations (mpg files) through scripts. The input will be a sculpture with a set of desired keyframe XYZ camera positions. I place the sculpture in the scene and execute the script that will read the desired camera keyframe positions. The script should animate/interpolate the camera through the key frame position set and at some point render the frames to create the movie from.
I am stuck at the beginning, how to iterate through the camera positions, while keeping the sculpture in the screen center view. It is not clear to me if that involves CameraLookAt, pivot point and CameraPosition while keeping control of the distance.
You are correct that setCameraLookAt() and setCameraPosition() are the main tools.
Just so I understand clearly, do you want to set keyframes via scripting? I assume yes since you mentioned interpolation.
If ChatGPT is telling you that you can set keyframes via scripting, it’s wrong.
You cannot create or edit keyframes through Python.
No lux.addKeyframe()
No ability to programmatically create animations for, Camera movement, Object translation/rotation/scaling, or Material animations. These must be created manually in the KeyShot GUI.
The Create Cameras with Additional Functions script I shared in my previous post provides all of the essential/scriptable camera settings/parameters.
If you want to define your own frame-by-frame camera positions you can script the setCameraLookAt() and setCameraPosition(), and setAnimationTime(), render each frame individually to create a sequence, and then assemble the frames into a movie.
Models, Groups, Parts, and Objects are all scripted differently. You may try to script a camera look at point on a model, but the script fails because the model is actually a group. Use lux.getSceneTree() to get the root of the scene tree to determine what you’re trying to set your look-at point to.
# AUTHOR LUX DT ChatGPT
# VERSION 1.2.1
# Traverse scene tree and print node and material information.
import lux
# Correct node type constants
NODE_TYPE_GROUP = 1
NODE_TYPE_MODEL = 5
NODE_TYPE_MODEL_SET = 4
NODE_TYPE_OBJECT = 2
def print_node_info(node, level=0):
"""
Print basic information about a node.
:param node: The node to print information about.
:param level: The indentation level for pretty printing.
"""
indent = ' ' * level
node_type = node.getKind()
if node_type == NODE_TYPE_MODEL_SET:
type_name = "Model Set"
elif node_type == NODE_TYPE_GROUP:
type_name = "Group"
elif node_type == NODE_TYPE_MODEL:
type_name = "Model"
elif node_type == NODE_TYPE_OBJECT:
type_name = "Part"
else:
type_name = "Unknown"
print(f"{indent}{node.getName()} (Type: {type_name})")
print(f"{indent}Node Name: {node.getName()}")
print(f"{indent}Node Type: {node.getKind()}")
def print_material_parameters(material, level=0):
"""
Print parameters of the material.
:param material: The material to print parameters for.
:param level: The indentation level for pretty printing.
"""
indent = ' ' * level
if hasattr(material, 'getName'):
print(f"{indent}### Material: {material.getName()}")
parameters = material.getParameters()
for param in parameters:
param_name = param.getName()
param_type = param.getType()
param_value = param.getValue()
print(f"{indent}- {param_name} (Type: {param_type}): {param_value}")
else:
print(f"{indent}Material: {material}")
def print_material_info(node, level=0):
"""
Print material information for a given node.
:param node: The node to print material information for.
:param level: The indentation level for pretty printing.
"""
try:
material = node.getMaterial()
if material:
if hasattr(material, 'getMaterialGraph'):
material_graph = material.getMaterialGraph()
if material_graph:
print(f"{' ' * level}Material graph found for part '{node.getName()}':")
print_material_graph_parameters(material_graph, level + 1)
else:
print(f"{' ' * level}No material graph found for part '{node.getName()}'.")
else:
print_material_parameters(material, level)
else:
print(f"{' ' * level}No material assigned to '{node.getName()}'.")
except Exception as e:
print(f"{' ' * level}Failed to get material for '{node.getName()}': {e}")
def traverse_scene_tree(node=None, level=0):
"""
Recursively traverse the scene tree and print node information.
:param node: The starting node of the scene tree. If None, start from the root.
:param level: The current level in the tree (used for indentation).
"""
if node is None:
node = lux.getSceneTree() # Get the root of the scene tree
print_node_info(node, level)
if node.getKind() in [NODE_TYPE_OBJECT, NODE_TYPE_GROUP, NODE_TYPE_MODEL]:
print_material_info(node, level)
# Recursively traverse child nodes if the node is a group, model, or model set
if node.getKind() in [NODE_TYPE_GROUP, NODE_TYPE_MODEL, NODE_TYPE_MODEL_SET]:
children = node.getChildren()
for child in children:
traverse_scene_tree(child, level + 1)
def print_material_graph_parameters(material_graph, level=0):
"""
Print parameters of all nodes in the material graph.
:param material_graph: The material graph to inspect.
:param level: The indentation level for pretty printing.
"""
indent = ' ' * level
shader_nodes = material_graph.getNodes()
for shader_node in shader_nodes:
print(f"{indent}Shader Node: {shader_node.getName()}")
parameters = shader_node.getParameters()
for param in parameters:
param_name = param.getName()
param_type = param.getType()
param_value = param.getValue()
print(f"{indent} Parameter Name: {param_name}, Type: {param_type}, Value: {param_value}")
# Example usage: Print the entire scene tree with node information
traverse_scene_tree()
I don’t use regular ChatGPT, I created 2 custom KeyShot GPT assistants. They know all the best practices and constraints of KeyShot Python and have over 100 scripting examples. If you’re an OpenAI subscriber they are available on the GPT store, links are below.
I made two test script versions: Spherical and Absolute based cameras.
Here are the two scripts. The Spherical scripts works, the Absolute script does not. I would prefer the Absolute approach rather than the Spherical approach.
Could you take a look on what may be wrong with the Absolute script?
def orbit_object_absolute(object_name=“Trifecta”, steps=100, delay=0.05, inclination_deg=10.0):
# Get the target object
root = lux.getSceneTree()
target_list = root.find(name=object_name)
if not target_list:
raise Exception(f"Object ‘{object_name}’ not found in the scene.")
target = target_list[0]
center_vec = target.getCenter(world=True)
center = luxmath.Vector(center_vec.x, center_vec.y, center_vec.z)
# Get current distance to maintain the same orbit radius
distance = lux.getCameraDistance()
# Convert inclination from degrees to radians
inclination_rad = math.radians(inclination_deg)
# Compute vertical Y-offset based on inclination
vertical_offset = math.sin(inclination_rad) * distance
horizontal_radius = math.cos(inclination_rad) * distance
for step in range(steps):
angle = -math.pi + (2 * math.pi * step / steps) # Full 360° in radians
# Compute camera position around the circle at given inclination
x = center.x + math.cos(angle) * horizontal_radius
y = center.y + vertical_offset
z = center.z + math.sin(angle) * horizontal_radius
position = (x, y, z)
# Set absolute camera position and orientation
lux.setCameraPosition(pos=position)
lux.setCameraLookAt(pt=(center.x, center.y, center.z))
print(f"Step {step+1}/{steps} | Position: {position}")
time.sleep(delay)
def orbit_absolute(object_name=“Trifecta”, steps=20, delay=0.2, height_offset=0.0):
# Find the object
root = lux.getSceneTree()
target_list = root.find(name=object_name)
if not target_list:
raise Exception(f"Object ‘{object_name}’ not found in the scene.")
target = target_list[0]
center_vec = target.getCenter(world=True)
center = luxmath.Vector(center_vec.x, center_vec.y, center_vec.z)
# Use current camera distance
distance = lux.getCameraDistance()
for i in range(steps):
# Compute angle in radians from -pi to +pi
angle = -math.pi + 2 * math.pi * (i / steps)
# Camera position on circle around the object
cam_x = center.x + math.cos(angle) * distance
cam_y = center.y + height_offset
cam_z = center.z + math.sin(angle) * distance
cam_pos = (cam_x, cam_y, cam_z)
# Set camera position and orientation
lux.setCameraPosition(pos=cam_pos)
lux.setCameraLookAt(pt=(center.x, center.y, center.z))
print(f"Step {i+1}/{steps} | Camera Pos: {cam_pos}")
time.sleep(delay)
One way to do this would be to use a CSV file with the sequential Camera Settings. Process:
Set Timeline Keyframe
Read CSV and Update Active Camera
Queue Image per CSV, frame number, or camera name
Repeat
Below are 3 scripts you can extend for the aforementioned functionality.
Save Camera Metadata to CSV
Reads Camera CSV
Batch Render Incremental Screenshots
The
Get Camera Info Save Metadata
# AUTHOR LUX DT ChatGPT
# VERSION 1.1.0
# This script will cycle through all the cameras in a scene and save their Metadata in the .meta format.
# A second accompanying script "Replicate Camera From METADATA" will do as the name implies.
import lux
import os
# Function to retrieve and format camera settings
def get_camera_metadata(camera_name):
lux.setCamera(camera_name) # Set the camera active
lens_info = lux.getCameraLens()
metadata = {
'name': camera_name,
'lens_type': lens_info.get('lens', 'Unknown'),
'focal_length': lux.getCameraFocalLength(),
'fov': lux.getCameraFieldOfView(),
'position': lux.getCameraPosition(),
'direction': lux.getCameraDirection(),
'distance': lux.getCameraDistance(),
'up_vector': lux.getCameraUp(),
'look_at': lux.getCameraLookAt(),
}
return metadata
# Function to format the metadata as .metadata
def format_as_metadata(metadata):
metadata_str = "Camera Metadata\n"
metadata_str += "====================\n"
for key, value in metadata.items():
metadata_str += f"{key}: {value}\n"
metadata_str += "====================\n"
return metadata_str
# Function to save the camera metadata to a .metadata file
def save_metadata_to_file(camera_metadata, filename):
with open(filename, 'w') as file:
file.write(camera_metadata)
# Function to handle the overall camera metadata extraction and saving
def save_all_camera_metadata(directory):
cameras = lux.getCameras() # Get all cameras
for camera in cameras:
metadata = get_camera_metadata(camera)
# Format the metadata as .metadata
metadata_str = format_as_metadata(metadata)
# Save the metadata to the specified directory with .metadata extension
filename = os.path.join(directory, f"{camera}.metadata")
save_metadata_to_file(metadata_str, filename)
lux.getMessageBox(f"Saved camera metadata to {filename}", "Success", lux.MESSAGE_BOX_INFO)
# Main function to retrieve the user directory and execute the script
def main():
# Get directory for saving metadata files
default_directory = lux.getKeyShotFolder(lux.FOLDER_SCRIPTS)
values = [("save_directory", lux.DIALOG_FOLDER, "Select directory to save camera metadata:", default_directory)]
user_input = lux.getInputDialog(title="Save Directory", desc="Choose the directory to save metadata files:", values=values)
if user_input:
save_directory = user_input.get('save_directory', default_directory)
save_all_camera_metadata(save_directory)
else:
lux.getMessageBox("No directory selected. Script canceled.", "Error", lux.MESSAGE_BOX_CRITICAL)
# Run the main function
main()
Read CSV
# AUTHOR LUX DT ChatGPT
# VERSION 1.1.0
# This script replicates Cameras from a METADATA file saved by the 'Get Camera Info Save METADATA.
import lux
import os
import re
# Function to select the .metadata file
def select_metadata_file():
values = [("file_path", lux.DIALOG_FILE, "Select Metadata file:", "*.metadata", "Metadata files (*.metadata)")]
opts = lux.getInputDialog(title="Select Metadata File", desc="Select a .metadata camera file.", values=values)
return opts["file_path"] if opts and "file_path" in opts else None
# Function to parse the .metadata file
def parse_metadata_file(file_path):
with open(file_path, 'r') as file:
content = file.read()
# Extract camera settings using regex for key-value pairs
camera_settings = {
'name': re.search(r'name:\s(.+)', content).group(1),
'lens_type': re.search(r'lens_type:\s([\d]+)', content).group(1),
'focal_length': float(re.search(r'focal_length:\s([\d.-]+)', content).group(1)),
'fov': float(re.search(r'fov:\s([\d.-]+)', content).group(1)),
'position': eval(re.search(r'position:\s(.+)', content).group(1)), # Evaluate tuple
'direction': eval(re.search(r'direction:\s(.+)', content).group(1)),
'distance': float(re.search(r'distance:\s([\d.-]+)', content).group(1)),
'up_vector': eval(re.search(r'up_vector:\s(.+)', content).group(1)),
'look_at': eval(re.search(r'look_at:\s(.+)', content).group(1)),
}
return camera_settings
# Function to apply camera settings in KeyShot
def apply_camera_settings(settings):
camera_name = settings['name']
lux.newCamera(camera_name) # Create new camera
lux.setCamera(camera_name) # Set the camera active
# Apply camera type based on lens_type
if settings['lens_type'] == '16384': # Assuming 16384 corresponds to perspective camera
lux.setCameraPerspective()
else:
lux.setCameraOrthographic() # Handle other types as needed
# Set camera properties
lux.setCameraFocalLength(settings['focal_length'])
lux.setCameraFieldOfView(settings['fov'])
lux.setCameraPosition(settings['position'])
lux.setCameraDirection(settings['direction'])
lux.setCameraDistance(settings['distance'])
lux.setCameraUp(settings['up_vector'])
lux.setCameraLookAt(pt=settings['look_at'])
lux.saveCamera()
print(f"Camera '{camera_name}' created and configured based on .metadata file.")
# Main function to run the script
def main():
file_path = select_metadata_file()
if file_path:
settings = parse_metadata_file(file_path)
if settings:
apply_camera_settings(settings)
lux.getMessageBox(f"Camera '{settings['name']}' created successfully!", "Success", lux.MESSAGE_BOX_INFO)
else:
lux.getMessageBox("Failed to extract camera settings from the .metadata file.", "Error", lux.MESSAGE_BOX_CRITICAL)
else:
lux.getMessageBox("No .metadata file selected.", "Error", lux.MESSAGE_BOX_CRITICAL)
# Run the main function
main()
Batch Render Incremental Screenshots
# AUTHOR LUX DT ChatGPT
# VERSION 0.1.1
# Batch render incrmental screenshots.
import lux
def take_and_save_screenshots(start_frame, end_frame, frame_step):
for current_frame in range(start_frame, end_frame + 1, frame_step):
# Directly set the animation frame
lux.setAnimationFrame(current_frame)
print(f"Frame {current_frame} set.")
# Capture the screenshot immediately after setting the frame
screenshot_path = lux.screenshot()
print(f"Screenshot saved: {screenshot_path}")
def main():
dialog_values = [
("start_frame", lux.DIALOG_INTEGER, "Start Frame:", 0, (0, 10000)),
("end_frame", lux.DIALOG_INTEGER, "End Frame:", 10, (0, 10000)),
("frame_step", lux.DIALOG_INTEGER, "Frame Step:", 1, (1, 100))
]
user_input = lux.getInputDialog(title="Capture Animation Frames",
desc="Set frame range for screenshots. Starting at frame 0 is recommended.",
values=dialog_values)
if user_input:
take_and_save_screenshots(int(user_input['start_frame']), int(user_input['end_frame']),
int(user_input['frame_step']))
else:
print("User cancelled the dialog.")
if __name__ == "__main__":
main()
# AUTHOR LUX DT ChatGPT
# VERSION 1.3.2
# Queue renders and name per camera (cleaned names without numeric suffix).
# NOTE Queued renders will have a long suffix but will be named per Camera.
import lux
import re
def clean_camera_name(camera_name):
"""
Removes numeric suffixes (e.g., '_123456') from camera names.
"""
return re.sub(r'[_-]\d+$', '', camera_name) # Removes underscore or hyphen followed by numbers
def render_with_camera_based_name(output_folder):
# Get all available cameras (camera names are returned as strings)
camera_names = lux.getCameras()
# Get current render options
render_options = lux.getRenderOptions()
# Set to add renders to the queue
render_options.setAddToQueue(True)
for camera_name in camera_names:
# Set the active camera using the correct function
lux.setCamera(camera_name)
# Verify the active camera is correctly set
active_camera = lux.getCamera()
# Ensure the active camera is the one we set
if active_camera != camera_name:
print(f"Failed to set camera: {camera_name}")
continue
# Clean the camera name to remove numeric suffix
cleaned_name = clean_camera_name(camera_name)
# Customize the file name based on the cleaned camera name
file_name = f"{output_folder}/{cleaned_name}_render.png"
# Render the image with the customized file name and current render options
lux.renderImage(file_name, width=1920, height=1080, opts=render_options)
print(f"Queued render for camera '{cleaned_name}' as: {file_name}")
# Example usage
output_folder = "C:/Users/Public/Documents/KeyShot Studio/Renderings/"
render_with_camera_based_name(output_folder)
@don.tuttle OMG, I am so on my way now! Thanks for all your help!
I have been struggling for days with the Absolute camera approach. I suspect there may be a bug with some of those functions. However, with the Spherical camera approach everything started working just fine. I will build my tools using absolute data and just convert it to what the spherical cameras want - no big deal.
Thanks again, I’ll ping you if I get into trouble again (quite likely)
Really nice work Anton! Love the chrome steel ones reflecting their environment. The marble sculptures look incredible fragile. Not sure if it’s an error or not but couldn’t click the digital works.
@don.tuttle I am good to go from here using the spherical camera functions. There might be a bug in the python absolute functions. How do I formally submit a potential bug report?
Here are the two code blocks again. The Spherical code block works fine, the absolute code block does not. While I have a workaround for now, I like to see what it would take to get it looked into and corrected since we are so close to identifying the issue.
Spherical cameras - works
import lux
import luxmath
import time
import math
def orbit_object_absolute(object_name="Trifecta", steps=100, delay=0.05, inclination_deg=10.0):
# Get the target object
root = lux.getSceneTree()
target_list = root.find(name=object_name)
if not target_list:
raise Exception(f"Object '{object_name}' not found in the scene.")
target = target_list[0]
center_vec = target.getCenter(world=True)
center = luxmath.Vector(center_vec.x, center_vec.y, center_vec.z)
# Get current distance to maintain the same orbit radius
distance = lux.getCameraDistance()
# Convert inclination from degrees to radians
inclination_rad = math.radians(inclination_deg)
# Compute vertical Y-offset based on inclination
vertical_offset = math.sin(inclination_rad) * distance
horizontal_radius = math.cos(inclination_rad) * distance
for step in range(steps):
angle = -math.pi + (2 * math.pi * step / steps) # Full 360° in radians
# Compute camera position around the circle at given inclination
x = center.x + math.cos(angle) * horizontal_radius
y = center.y + vertical_offset
z = center.z + math.sin(angle) * horizontal_radius
position = (x, y, z)
# Set absolute camera position and orientation
lux.setCameraPosition(pos=position)
lux.setCameraLookAt(pt=(center.x, center.y, center.z))
print(f"Step {step+1}/{steps} | Position: {position}")
time.sleep(delay)
# Run the orbit
orbit_object_absolute()
Absolute cameras - does not work properly
import lux
import luxmath
import time
import math
def orbit_absolute(object_name="Trifecta", steps=20, delay=0.2, height_offset=0.0):
# Find the object
root = lux.getSceneTree()
target_list = root.find(name=object_name)
if not target_list:
raise Exception(f"Object '{object_name}' not found in the scene.")
target = target_list[0]
center_vec = target.getCenter(world=True)
center = luxmath.Vector(center_vec.x, center_vec.y, center_vec.z)
# Use current camera distance
distance = lux.getCameraDistance()
for i in range(steps):
# Compute angle in radians from -pi to +pi
angle = -math.pi + 2 * math.pi * (i / steps)
# Camera position on circle around the object
cam_x = center.x + math.cos(angle) * distance
cam_y = center.y + height_offset
cam_z = center.z + math.sin(angle) * distance
cam_pos = (cam_x, cam_y, cam_z)
# Set camera position and orientation
lux.setCameraPosition(pos=cam_pos)
lux.setCameraLookAt(pt=(center.x, center.y, center.z))
print(f"Step {i+1}/{steps} | Camera Pos: {cam_pos}")
time.sleep(delay)
# Run orbit
orbit_absolute()
Thanks Oscar @oscar.rottink and thank you for pointing out the broken digital link. I think I fixed it.
The videos shown are not all done with Keyshot. Some of them are done with Houdini (not by me).
The animation scripting tool is to get ready to attempt generating beautiful videos in Keyshot. I could use some help of someone showing me how to do this nicely in Keyshot. Let me know if you know of someone I could approach for that.
Great to hear you guys are on it. And very glad that spherical camera is a good work around. I have made significant progress in my animation script tooling now that I have a workaround.